In [1]:
%matplotlib qt
In [2]:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from mne import merge_events
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator,
cross_val_multiscore, LinearModel, get_coef)
plt.close('all')
In [3]:
data_path = "/Users/lassemadsen/Documents/CFIN_Praktik/MEG_dataCleaned/run_2_raw_sss.fif"
In [4]:
raw = mne.io.read_raw_fif(data_path)
In [5]:
events = mne.find_events(raw, min_duration=0.01)
print('Found %s events, first five:' % len(events))
print(events[:5])
In [6]:
event_id = {'Q1/1':101, 'Q1/2':102, 'Q1/3':103,
'Q2/1':104, 'Q2/2':105, 'Q2/3':106,
'Q3/1':107, 'Q3/2':108, 'Q3/3':109,
'Q4/1':110, 'Q4/2':111, 'Q4/3':112,
'Q1/4':113, 'Q1/5':114, 'Q1/6':115,
'Q2/4':116, 'Q2/5':117, 'Q2/6':118,
'Q3/4':119, 'Q3/5':120, 'Q3/6':121,
'Q4/4':122, 'Q4/5':123, 'Q4/6':124,
'Q1/7':125, 'Q1/8':126, 'Q1/9':127,
'Q2/7':128, 'Q2/8':129, 'Q2/9':130,
'Q3/7':131, 'Q3/8':132, 'Q3/9':133,
'Q4/7':134, 'Q4/8':135, 'Q4/9':136}
In [7]:
l_freq = 1
h_freq = 40
raw.load_data().filter(l_freq,h_freq)
Out[7]:
In [ ]:
raw.plot(n_channels=10, order='selection')
In [ ]:
tmin, tmax = -0.05, 0.2
baseline = None
reject = {'mag': 4e-12,'eog':200e-6}
epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, reject=reject, detrend=0, baseline=baseline)
In [ ]:
epochs.plot(block=True)
In [ ]:
picks = mne.pick_types(epochs.info, meg=True, eog=False)
evoked_Q1_1 = epochs['Q1/1'].average(picks=picks)
evoked_Q1_2 = epochs['Q1/2'].average(picks=picks)
evoked_Q1_3 = epochs['Q1/3'].average(picks=picks)
evoked_Q1_4 = epochs['Q1/4'].average(picks=picks)
evoked_Q1_5 = epochs['Q1/5'].average(picks=picks)
evoked_Q1_6 = epochs['Q1/6'].average(picks=picks)
evoked_Q1_7 = epochs['Q1/7'].average(picks=picks)
evoked_Q1_8 = epochs['Q1/8'].average(picks=picks)
evoked_Q1_9 = epochs['Q1/9'].average(picks=picks)
evoked_Q2_1 = epochs['Q2/1'].average(picks=picks)
evoked_Q2_2 = epochs['Q2/2'].average(picks=picks)
evoked_Q2_3 = epochs['Q2/3'].average(picks=picks)
evoked_Q2_4 = epochs['Q2/4'].average(picks=picks)
evoked_Q2_5 = epochs['Q2/5'].average(picks=picks)
evoked_Q2_6 = epochs['Q2/6'].average(picks=picks)
evoked_Q2_7 = epochs['Q2/7'].average(picks=picks)
evoked_Q2_8 = epochs['Q2/8'].average(picks=picks)
evoked_Q2_9 = epochs['Q2/9'].average(picks=picks)
evoked_Q3_1 = epochs['Q3/1'].average(picks=picks)
evoked_Q3_2 = epochs['Q3/2'].average(picks=picks)
evoked_Q3_3 = epochs['Q3/3'].average(picks=picks)
evoked_Q3_4 = epochs['Q3/4'].average(picks=picks)
evoked_Q3_5 = epochs['Q3/5'].average(picks=picks)
evoked_Q3_6 = epochs['Q3/6'].average(picks=picks)
evoked_Q3_7 = epochs['Q3/7'].average(picks=picks)
evoked_Q3_8 = epochs['Q3/8'].average(picks=picks)
evoked_Q3_9 = epochs['Q3/9'].average(picks=picks)
evoked_Q4_1 = epochs['Q4/1'].average(picks=picks)
evoked_Q4_2 = epochs['Q4/2'].average(picks=picks)
evoked_Q4_3 = epochs['Q4/3'].average(picks=picks)
evoked_Q4_4 = epochs['Q4/4'].average(picks=picks)
evoked_Q4_5 = epochs['Q4/5'].average(picks=picks)
evoked_Q4_6 = epochs['Q4/6'].average(picks=picks)
evoked_Q4_7 = epochs['Q4/7'].average(picks=picks)
evoked_Q4_8 = epochs['Q4/8'].average(picks=picks)
evoked_Q4_9 = epochs['Q4/9'].average(picks=picks)
In [ ]:
picks = mne.pick_types(evoked_Q3_2.info, meg=True, eeg=False, eog=False)
evoked_Q3_2.plot(spatial_colors=True, picks=picks)
In [ ]:
ts_args = dict(gfp=True)
times = [0.035, 0.085, 0.135]
topomap_args = dict(sensors=False)
evoked_Q1_2.plot_joint(title='Plot af Q1/2', times=times,
ts_args=ts_args, topomap_args=topomap_args)
evoked_Q3_2.plot_joint(title='Plot af Q3/2', times=times,
ts_args=ts_args, topomap_args=topomap_args)
In [ ]:
fig, ax = plt.subplots(1, 10)
times = 0.085
evoked_Q1_1.plot_topomap(times=times, axes=ax[0], show=False, colorbar=False, title='Q1')
evoked_Q1_2.plot_topomap(times=times, axes=ax[1], show=False, colorbar=False)
evoked_Q1_3.plot_topomap(times=times, axes=ax[2], show=False, colorbar=False)
evoked_Q1_4.plot_topomap(times=times, axes=ax[3], show=False, colorbar=False)
evoked_Q1_5.plot_topomap(times=times, axes=ax[4], show=False, colorbar=False)
evoked_Q1_6.plot_topomap(times=times, axes=ax[5], show=False, colorbar=False)
evoked_Q1_7.plot_topomap(times=times, axes=ax[6], show=False, colorbar=False)
evoked_Q1_8.plot_topomap(times=times, axes=ax[7], show=False, colorbar=False)
evoked_Q1_9.plot_topomap(times=times, axes=ax[8], show=False, colorbar=True)
fig, ax = plt.subplots(1, 10)
evoked_Q2_1.plot_topomap(times=times, axes=ax[0], show=False, colorbar=False, title='Q2')
evoked_Q2_2.plot_topomap(times=times, axes=ax[1], show=False, colorbar=False)
evoked_Q2_3.plot_topomap(times=times, axes=ax[2], show=False, colorbar=False)
evoked_Q2_4.plot_topomap(times=times, axes=ax[3], show=False, colorbar=False)
evoked_Q2_5.plot_topomap(times=times, axes=ax[4], show=False, colorbar=False)
evoked_Q2_6.plot_topomap(times=times, axes=ax[5], show=False, colorbar=False)
evoked_Q2_7.plot_topomap(times=times, axes=ax[6], show=False, colorbar=False)
evoked_Q2_8.plot_topomap(times=times, axes=ax[7], show=False, colorbar=False)
evoked_Q2_9.plot_topomap(times=times, axes=ax[8], show=False, colorbar=True)
fig, ax = plt.subplots(1, 10)
evoked_Q3_1.plot_topomap(times=times, axes=ax[0], show=False, colorbar=False, title='Q3')
evoked_Q3_2.plot_topomap(times=times, axes=ax[1], show=False, colorbar=False)
evoked_Q3_3.plot_topomap(times=times, axes=ax[2], show=False, colorbar=False)
evoked_Q3_4.plot_topomap(times=times, axes=ax[3], show=False, colorbar=False)
evoked_Q3_5.plot_topomap(times=times, axes=ax[4], show=False, colorbar=False)
evoked_Q3_6.plot_topomap(times=times, axes=ax[5], show=False, colorbar=False)
evoked_Q3_7.plot_topomap(times=times, axes=ax[6], show=False, colorbar=False)
evoked_Q3_8.plot_topomap(times=times, axes=ax[7], show=False, colorbar=False)
evoked_Q3_9.plot_topomap(times=times, axes=ax[8], show=False, colorbar=True)
fig, ax = plt.subplots(1, 10)
evoked_Q4_1.plot_topomap(times=times, axes=ax[0], show=False, colorbar=False, title='Q4')
evoked_Q4_2.plot_topomap(times=times, axes=ax[1], show=False, colorbar=False)
evoked_Q4_3.plot_topomap(times=times, axes=ax[2], show=False, colorbar=False)
evoked_Q4_4.plot_topomap(times=times, axes=ax[3], show=False, colorbar=False)
evoked_Q4_5.plot_topomap(times=times, axes=ax[4], show=False, colorbar=False)
evoked_Q4_6.plot_topomap(times=times, axes=ax[5], show=False, colorbar=False)
evoked_Q4_7.plot_topomap(times=times, axes=ax[6], show=False, colorbar=False)
evoked_Q4_8.plot_topomap(times=times, axes=ax[7], show=False, colorbar=False)
evoked_Q4_9.plot_topomap(times=times, axes=ax[8], show=False, colorbar=True)
In [13]:
# Opdel i tripletter
tmp = np.arange(101, 137, 1).reshape(12, 3)
print(tmp)
In [14]:
# Udvælg upper id's
upper_ids = tmp[[0,3,4,7,8,11], :].ravel()
# Udvælg lower id's
lower_ids = tmp[[1,2,5,6,9,10], :].ravel()
upLow_events = mne.merge_events(mne.merge_events(events, upper_ids, 1), lower_ids, 2)
In [15]:
picks = mne.pick_types(raw.info, meg=True, eog=False, misc=False)
event_id_upLow = {'Upper': 1, 'Lower':2}
tmin, tmax = -0.05, 0.2
baseline = None
reject = {'mag': 4e-12}
epochs_upLow = mne.Epochs(raw, events=upLow_events, event_id=event_id_upLow, tmin=tmin,
tmax=tmax, reject=reject, detrend=0, baseline=baseline, decim=4, picks=picks)
In [16]:
X = epochs_upLow.get_data() # MEG signals: n_epochs, n_channels, n_times
y = epochs_upLow.events[:, 2] # target: Audio left or right
clf = make_pipeline(StandardScaler(), LogisticRegression())
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs_upLow.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
plt.show()
In [17]:
# define the Temporal Generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc')
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs_upLow.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
plt.show()
# Plot the full matrix
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs_upLow.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal Generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
plt.show()
In [8]:
# Opdel i tripletter
tmp = np.arange(101, 137, 1).reshape(12, 3)
# Udvælg right id's
right_ids = tmp[[0,1,4,5,8,9], :].ravel()
# Udvælg left id's
left_ids = tmp[[2,3,6,7,10,11], :].ravel()
In [9]:
rightLeft_events = mne.merge_events(mne.merge_events(events, right_ids, 1), left_ids, 2)
In [10]:
picks = mne.pick_types(raw.info, meg=True, eog=False, misc=False)
event_id_rightLeft = {'Right': 1, 'Left':2}
tmin, tmax = -0.05, 0.2
baseline = None
reject = {'mag': 4e-12}
epochs_rightLeft = mne.Epochs(raw, events=rightLeft_events, event_id=event_id_rightLeft, tmin=tmin,
tmax=tmax, reject=reject, detrend=0, baseline=baseline, decim=4, picks=picks)
In [11]:
X = epochs_rightLeft.get_data() # MEG signals: n_epochs, n_channels, n_times
y = epochs_rightLeft.events[:, 2] # target: Audio left or right
clf = make_pipeline(StandardScaler(), LogisticRegression())
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs_rightLeft.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
plt.show()
In [12]:
# Plot the full matrix
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs_rightLeft.times, vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal Generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
plt.show()
In [ ]:
tmp_id_Q = np.arange(101,137,1).reshape(12, 3)
Q1_id = tmp_id_Q[[0,4,8],:].ravel()
Q2_id = tmp_id_Q[[1,5,9],:].ravel()
Q3_id = tmp_id_Q[[2,6,10],:].ravel()
Q4_id = tmp_id_Q[[3,7,11],:].ravel()
In [ ]:
new_events_Q = mne.merge_events(events, Q1_id, 1)
new_events_Q = mne.merge_events(new_events_Q, Q2_id, 2)
new_events_Q = mne.merge_events(new_events_Q, Q3_id, 3)
new_events_Q = mne.merge_events(new_events_Q, Q4_id, 4)
In [ ]:
picks = mne.pick_types(raw.info, meg=True, eog=False, misc=False)
tmin, tmax = -0.05, 0.2
baseline = None
reject = {'mag': 4e-12}
new_event_Q12_id = dict(Q1=1, Q2=2)
epochs_MVP_Q12 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q12_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
new_event_Q13_id = dict(Q1=1, Q3=3)
epochs_MVP_Q13 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q13_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
new_event_Q14_id = dict(Q1=1, Q4=4)
epochs_MVP_Q14 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q14_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
new_event_Q23_id = dict(Q2=2, Q3=3)
epochs_MVP_Q23 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q23_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
new_event_Q24_id = dict(Q2=2, Q4=4)
epochs_MVP_Q24 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q24_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
new_event_Q34_id = dict(Q3=3, Q4=4)
epochs_MVP_Q34 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q34_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
In [ ]:
XQ12 = epochs_MVP_Q12.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ12 = epochs_MVP_Q12.events[:, 2] # target
XQ13 = epochs_MVP_Q13.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ13 = epochs_MVP_Q13.events[:, 2] # target
XQ14 = epochs_MVP_Q14.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ14 = epochs_MVP_Q14.events[:, 2] # target
XQ23 = epochs_MVP_Q23.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ23 = epochs_MVP_Q23.events[:, 2] # target
XQ24 = epochs_MVP_Q24.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ24 = epochs_MVP_Q24.events[:, 2] # target
XQ34 = epochs_MVP_Q34.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ34 = epochs_MVP_Q34.events[:, 2] # target
In [ ]:
scoring_function1 = 'roc_auc'
clf = make_pipeline(StandardScaler(), LogisticRegression())
# define the Temporal Generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring=scoring_function1)
# 12
scoresQ12 = cross_val_multiscore(time_gen, XQ12, yQ12, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ12 = np.mean(scoresQ12, axis=0)
# 13
scoresQ13 = cross_val_multiscore(time_gen, XQ13, yQ13, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ13 = np.mean(scoresQ13, axis=0)
# 14
scoresQ14 = cross_val_multiscore(time_gen, XQ14, yQ14, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ14 = np.mean(scoresQ14, axis=0)
# 23
scoresQ23 = cross_val_multiscore(time_gen, XQ23, yQ23, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ23 = np.mean(scoresQ23, axis=0)
# 24
scoresQ24 = cross_val_multiscore(time_gen, XQ24, yQ24, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ24 = np.mean(scoresQ24, axis=0)
# 34
scoresQ34 = cross_val_multiscore(time_gen, XQ34, yQ34, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ34 = np.mean(scoresQ34, axis=0)
In [ ]:
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs_MVP_Q12.times, np.diag(scoresQ12), label='Q1 vs Q2')
ax.plot(epochs_MVP_Q13.times, np.diag(scoresQ13), label='Q1 vs Q3')
ax.plot(epochs_MVP_Q14.times, np.diag(scoresQ14), label='Q1 vs Q4')
ax.plot(epochs_MVP_Q23.times, np.diag(scoresQ23), label='Q2 vs Q3')
ax.plot(epochs_MVP_Q24.times, np.diag(scoresQ24), label='Q2 vs Q4')
ax.plot(epochs_MVP_Q34.times, np.diag(scoresQ34), label='Q3 vs Q4')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time in different quadrants')
plt.show()
In [ ]:
picks = mne.pick_types(raw.info, meg=True, eog=False, misc=False)
tmin, tmax = -0.05, 0.2
baseline = None
reject = {'mag': 4e-12}
tmp_id_Q = np.arange(101,137,1).reshape(12, 3)
Q1_id = tmp_id_Q[[0,4,8],:].ravel()
Q2_id = tmp_id_Q[[1,5,9],:].ravel()
Q3_id = tmp_id_Q[[2,6,10],:].ravel()
Q4_id = tmp_id_Q[[3,7,11],:].ravel()
new_events_Q = mne.merge_events(events, Q1_id, 1)
new_events_Q = mne.merge_events(new_events_Q, Q2_id, 2)
new_events_Q = mne.merge_events(new_events_Q, Q3_id, 3)
new_events_Q = mne.merge_events(new_events_Q, Q4_id, 4)
In [ ]:
new_event_Q14_id = dict(Q1=1, Q4=4)
epochs_MVP_Q14 = mne.Epochs(raw = raw, events = new_events_Q, event_id = new_event_Q14_id, tmin = tmin, tmax = tmax, proj=True,
baseline=None, detrend = 0, preload=True,
reject=reject, decim=4, picks=picks)
In [ ]:
XQ14 = epochs_MVP_Q14.get_data() # MEG signals: n_epochs, n_channels, n_times
yQ14 = epochs_MVP_Q14.events[:, 2] # target
In [ ]:
scoring_function1 = 'roc_auc'
clf = make_pipeline(StandardScaler(), LogisticRegression())
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
# define the Temporal Generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring=scoring_function1)
# 14
scoresQ14 = cross_val_multiscore(time_gen, XQ14, yQ14, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scoresQ14 = np.mean(scoresQ14, axis=0)
In [ ]:
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs_MVP_Q14.times, np.diag(scoresQ14), label='Q1 vs Q4')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time in different quadrants')
plt.show()
In [ ]: